Getting data from kaggle

In [ ]:
!pip install kaggle
Requirement already satisfied: kaggle in /usr/local/lib/python3.6/dist-packages (1.5.6)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from kaggle) (2.23.0)
Requirement already satisfied: certifi in /usr/local/lib/python3.6/dist-packages (from kaggle) (2020.6.20)
Requirement already satisfied: python-dateutil in /usr/local/lib/python3.6/dist-packages (from kaggle) (2.8.1)
Requirement already satisfied: six>=1.10 in /usr/local/lib/python3.6/dist-packages (from kaggle) (1.12.0)
Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from kaggle) (1.24.3)
Requirement already satisfied: python-slugify in /usr/local/lib/python3.6/dist-packages (from kaggle) (4.0.0)
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from kaggle) (4.41.1)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->kaggle) (2.9)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->kaggle) (3.0.4)
Requirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.6/dist-packages (from python-slugify->kaggle) (1.3)
In [ ]:
from google.colab import files
files.upload()
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving kaggle.json to kaggle.json
Out[ ]:
{'kaggle.json': b'{"username":"amitlevizky","key":"8496db359317247ebfec0b7dcd9137b4"}'}
In [ ]:
!mkdir -p ~/.kaggle
!cp kaggle.json ~/.kaggle/
!chmod 600 ~/.kaggle/kaggle.json
In [ ]:
!kaggle datasets download -d zaraks/pascal-voc-2007
Downloading pascal-voc-2007.zip to /content
100% 1.65G/1.65G [00:21<00:00, 96.9MB/s]
100% 1.65G/1.65G [00:21<00:00, 81.7MB/s]
In [ ]:
!ls
kaggle.json  pascal-voc-2007.zip  sample_data
In [ ]:
import zipfile
zip_ref = zipfile.ZipFile("pascal-voc-2007.zip", 'r')
zip_ref.extractall("files")
zip_ref.close()

Plotting method for loss and PSNR metrics

In [ ]:
from keras.layers import Dense
import matplotlib.pyplot as plt

def plot_loss(history):
  plt.plot(history.history['loss'])
  plt.plot(history.history['val_loss'])
  plt.title('model loss')
  plt.ylabel('loss')
  plt.xlabel('epoch')
  plt.legend(['train', 'val'], loc='upper left')
  plt.show()

def plot_PSNR(history):
  psnr_str = get_psnr(history.history.keys())
  psnr_val_str = get_psnr_val(history.history.keys())
  plt.plot(history.history[psnr_str])
  plt.plot(history.history[psnr_val_str])
  plt.title('PSNR')
  plt.ylabel('PSNR')
  plt.xlabel('epoch')
  plt.legend(['train', 'val'], loc='upper left')
  plt.show()

def get_psnr(history_keys):
  for metric in history_keys:
    if 'PSNR' in metric and 'val' not in metric:
      return metric

def get_psnr_val(history_keys):
  for metric in history_keys:
    if 'PSNR' in metric and 'val' in metric:
      return metric
Using TensorFlow backend.
In [ ]:
def plot_impages(train_gen_72, train_gen_144, train_gen_288, preds_model):
  f1, axarr1 = plt.subplots(nrows=1, ncols=3, figsize=(20,16), squeeze=False)
  f2, axarr2 = plt.subplots(nrows=1, ncols=2, figsize=(14,7), squeeze=False)

  axarr1[0,0].imshow(train_gen_72[0][0])
  axarr1[0,1].imshow(train_gen_144[0][0])
  axarr1[0,2].imshow(train_gen_288[0][0])

  axarr1[0,0].title.set_text('(72,72)')
  axarr1[0,1].title.set_text('(144,144)')
  axarr1[0,2].title.set_text('(288,288)')




  axarr2[0,0].imshow(preds_model[0][0])
  axarr2[0,1].imshow(preds_model[1][0])

  axarr2[0,0].title.set_text('(72,72)->(144,144)')
  axarr2[0,1].title.set_text('(72,72)->(288,288)')

Preparring the data

we will need to use generator to load the data. ImageDataGenerator will be for help here

In [ ]:
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
# load all images in a directory
import numpy as np
from os import listdir
import cv2

train_path = "/content/files/VOCtrainval_06-Nov-2007/VOCdevkit/VOC2007"
test_path = "/content/files/VOCtest_06-Nov-2007/VOCdevkit/VOC2007"


# load all images in a directory
file_names = listdir(train_path)
file_names_100 = listdir(train_path)[:100] # loads only the first 100 images, just for simplicity

X_train_dims = (72,72)
y_mid_dims = (144,144)
y_large_dims = (288,288)

batch_size = 64

epochs = 15

data_generator = ImageDataGenerator(rescale=1./255, validation_split=0.2)

train_gen_72 = data_generator.flow_from_directory(train_path + '/', classes=['JPEGImages'] ,shuffle=False, color_mode='rgb', target_size=X_train_dims, batch_size=batch_size, class_mode=None, subset='training')
train_gen_72_val = data_generator.flow_from_directory(train_path + '/', classes=['JPEGImages'] ,shuffle=False, color_mode='rgb', target_size=X_train_dims, batch_size=batch_size, class_mode=None, subset='validation')

train_gen_144 = data_generator.flow_from_directory(train_path + '/', classes=['JPEGImages'] ,shuffle=False, color_mode='rgb', target_size=y_mid_dims, batch_size=batch_size, class_mode=None, subset='training')
train_gen_144_val = data_generator.flow_from_directory(train_path + '/', classes=['JPEGImages'] ,shuffle=False, color_mode='rgb', target_size=y_mid_dims, batch_size=batch_size, class_mode=None, subset='validation')

train_gen_288 = data_generator.flow_from_directory(train_path + '/', classes=['JPEGImages'], shuffle=False, color_mode='rgb', target_size=y_large_dims, batch_size=batch_size, class_mode=None, subset='training')
train_gen_288_val = data_generator.flow_from_directory(train_path + '/', classes=['JPEGImages'], shuffle=False, color_mode='rgb', target_size=y_large_dims, batch_size=batch_size, class_mode=None, subset='validation')

def fit_generator(X, y_mid, y_large):
  while True:
    yield (X.next(), [y_mid.next(), y_large.next()])

def train_generator(train_gen, size=1):
  for sample in train_gen:
    yield [sample[i] for i in range(size)]

def reset_generators(gen):
  gen.reset()
Found 4009 images belonging to 1 classes.
Found 1002 images belonging to 1 classes.
Found 4009 images belonging to 1 classes.
Found 1002 images belonging to 1 classes.
Found 4009 images belonging to 1 classes.
Found 1002 images belonging to 1 classes.

Models:

1. build_model: will return a basic model. The input to this model is an image of dimensions (72X72) and the output will be splitted for 2 new images:

  1. image with dimensions of (144X144)
  2. image with dimensionsof (288X288)

2. build_residual_model: will return a little more complex model than the previous model. The input to this model is an image of dimensions (72X72) and the will be splitted for 2:

  1. image with dimensions of (144X144)
  2. image with dimensionsof (288X288)

3. build_deep_residual_model: Same model as the previous one but this time with the addition of more residual blocks. The level of complexity of the model is increasing. The input to this model is an image of dimensions (72X72) and the output will be splitted for 2 new images:

  1. image with dimensions of (144X144)
  2. image with dimensionsof (288X288)

4. build_dilation_model: The model is based on the previous models that used residual block. In this model, we will replace the residual blocks with dilation blocks. When selected in dilation at intervals of 1, 2, and 4. Then, we will perform concatenation and proceed in a similar fashion to the previous model. The input to this model is an image of dimensions (72X72) and the output will be splitted for 2 new images:

  1. image with dimensions of (144X144)
  2. image with dimensionsof (288X288)

5. build_dilation_vgg16_model: In this model, we will use regular convolution layers instead of the dilation or residual layers. Also, we will use a pre-trained model, in our case, VGG16, when it is already trained on imagenet. We will do concatenation, from what we got from the convolution and the trained model, and continue as in the previous models. The input to this model is an image of dimensions (72X72) and the output will be splitted for 2 new images:

  1. image with dimensions of (144X144)
  2. image with dimensionsof (288X288)
In [ ]:
from tensorflow.keras.applications.vgg16 import VGG16
model_vgg_16 = VGG16(weights='imagenet', include_top=False, input_shape=(None, None, 3))
model_vgg_16_layer = model_vgg_16.get_layer("block1_conv2").output
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5
58892288/58889256 [==============================] - 0s 0us/step

imports and helper methods

In [ ]:
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Input, add, Add, Concatenate
from tensorflow.keras.layers import LeakyReLU, Activation
from tensorflow.keras.applications.vgg16 import VGG16
from keras import backend as K




def PSNR(y_true, y_pred):
    max_pixel = 1.0
    return (10.0 * K.log((max_pixel ** 2) / (K.mean(K.square(y_pred - y_true), axis=-1)))) / 2.303

def n_samples(gen, size):
  for x in train_gen_72:
      yield [x[i] for i in range(size)]

Compiling the first model

initial fully convolutional

In [ ]:
# initial fully convolutional
def build_model():
  input1 = Input(shape=(None, None, 3))
  x = (Conv2D(64, (3, 3), activation='relu', padding='same'))(input1)
  x = (Conv2D(64, (3, 3), activation='relu', padding='same'))(x)
  x = (UpSampling2D((2,2)))(x)
  output2 = (UpSampling2D((2,2)))(x)
  output2 = (Conv2D(3, 1, activation='relu', padding='same'))(output2)
  x = (Conv2D(3, 1, activation='relu', padding='same'))(x)
  return Model(inputs=input1, outputs=[x, output2])


model = build_model()
model.compile(optimizer='adam', loss='mse', metrics=[PSNR])
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, None, None,  0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, None, None, 6 1792        input_1[0][0]                    
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, None, None, 6 36928       conv2d[0][0]                     
__________________________________________________________________________________________________
up_sampling2d (UpSampling2D)    (None, None, None, 6 0           conv2d_1[0][0]                   
__________________________________________________________________________________________________
up_sampling2d_1 (UpSampling2D)  (None, None, None, 6 0           up_sampling2d[0][0]              
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, None, None, 3 195         up_sampling2d[0][0]              
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, None, None, 3 195         up_sampling2d_1[0][0]            
==================================================================================================
Total params: 39,110
Trainable params: 39,110
Non-trainable params: 0
__________________________________________________________________________________________________

Fitting the first model, plot the loss and reset the generators

In [ ]:
history_1 = model.fit_generator(fit_generator(train_gen_72, train_gen_144, train_gen_288),
                              steps_per_epoch=train_gen_72.samples // batch_size,
                              validation_data = fit_generator(train_gen_72_val, train_gen_144_val, train_gen_288_val),
                              validation_steps = train_gen_72_val.samples // batch_size,
                              epochs = epochs)

reset_generators(train_gen_72)
reset_generators(train_gen_144)
reset_generators(train_gen_288)
reset_generators(train_gen_72_val)
reset_generators(train_gen_144_val)
reset_generators(train_gen_288_val)
WARNING:tensorflow:From <ipython-input-12-e97ef603da2d>:5: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
Please use Model.fit, which supports generators.
Epoch 1/15
62/62 [==============================] - 58s 928ms/step - loss: 0.1065 - conv2d_3_loss: 0.0475 - conv2d_2_loss: 0.0590 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0246 - val_conv2d_3_loss: 0.0122 - val_conv2d_2_loss: 0.0124 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 2/15
62/62 [==============================] - 58s 936ms/step - loss: 0.0220 - conv2d_3_loss: 0.0110 - conv2d_2_loss: 0.0110 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0209 - val_conv2d_3_loss: 0.0104 - val_conv2d_2_loss: 0.0104 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: 29.6665
Epoch 3/15
62/62 [==============================] - 58s 938ms/step - loss: 0.0207 - conv2d_3_loss: 0.0103 - conv2d_2_loss: 0.0103 - conv2d_3_PSNR: inf - conv2d_2_PSNR: 29.9126 - val_loss: 0.0204 - val_conv2d_3_loss: 0.0102 - val_conv2d_2_loss: 0.0102 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: 30.0358
Epoch 4/15
62/62 [==============================] - 58s 936ms/step - loss: 0.0204 - conv2d_3_loss: 0.0102 - conv2d_2_loss: 0.0102 - conv2d_3_PSNR: inf - conv2d_2_PSNR: 30.1842 - val_loss: 0.0203 - val_conv2d_3_loss: 0.0101 - val_conv2d_2_loss: 0.0102 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 5/15
62/62 [==============================] - 59s 945ms/step - loss: 0.0203 - conv2d_3_loss: 0.0102 - conv2d_2_loss: 0.0102 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0202 - val_conv2d_3_loss: 0.0101 - val_conv2d_2_loss: 0.0101 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 6/15
62/62 [==============================] - 59s 946ms/step - loss: 0.0202 - conv2d_3_loss: 0.0101 - conv2d_2_loss: 0.0101 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0201 - val_conv2d_3_loss: 0.0100 - val_conv2d_2_loss: 0.0101 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 7/15
62/62 [==============================] - 59s 955ms/step - loss: 0.0202 - conv2d_3_loss: 0.0101 - conv2d_2_loss: 0.0101 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0201 - val_conv2d_3_loss: 0.0100 - val_conv2d_2_loss: 0.0101 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 8/15
62/62 [==============================] - 60s 970ms/step - loss: 0.0201 - conv2d_3_loss: 0.0100 - conv2d_2_loss: 0.0101 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0201 - val_conv2d_3_loss: 0.0100 - val_conv2d_2_loss: 0.0101 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 9/15
62/62 [==============================] - 60s 971ms/step - loss: 0.0201 - conv2d_3_loss: 0.0100 - conv2d_2_loss: 0.0101 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0200 - val_conv2d_3_loss: 0.0100 - val_conv2d_2_loss: 0.0100 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 10/15
62/62 [==============================] - 60s 972ms/step - loss: 0.0201 - conv2d_3_loss: 0.0100 - conv2d_2_loss: 0.0101 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0200 - val_conv2d_3_loss: 0.0100 - val_conv2d_2_loss: 0.0100 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 11/15
62/62 [==============================] - 60s 973ms/step - loss: 0.0200 - conv2d_3_loss: 0.0100 - conv2d_2_loss: 0.0100 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0200 - val_conv2d_3_loss: 0.0099 - val_conv2d_2_loss: 0.0100 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 12/15
62/62 [==============================] - 60s 975ms/step - loss: 0.0200 - conv2d_3_loss: 0.0100 - conv2d_2_loss: 0.0100 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0199 - val_conv2d_3_loss: 0.0099 - val_conv2d_2_loss: 0.0100 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 13/15
62/62 [==============================] - 61s 988ms/step - loss: 0.0201 - conv2d_3_loss: 0.0100 - conv2d_2_loss: 0.0101 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0199 - val_conv2d_3_loss: 0.0099 - val_conv2d_2_loss: 0.0100 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 14/15
62/62 [==============================] - 61s 984ms/step - loss: 0.0200 - conv2d_3_loss: 0.0100 - conv2d_2_loss: 0.0100 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0199 - val_conv2d_3_loss: 0.0099 - val_conv2d_2_loss: 0.0100 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
Epoch 15/15
62/62 [==============================] - 61s 986ms/step - loss: 0.0200 - conv2d_3_loss: 0.0100 - conv2d_2_loss: 0.0100 - conv2d_3_PSNR: inf - conv2d_2_PSNR: inf - val_loss: 0.0199 - val_conv2d_3_loss: 0.0099 - val_conv2d_2_loss: 0.0100 - val_conv2d_3_PSNR: inf - val_conv2d_2_PSNR: inf
In [ ]:
#plot loss metrics
plot_loss(history_1)
In [ ]:
#plot PSNR metrics
plot_PSNR(history_1)
In [ ]:
images_100 = np.asarray(next(n_samples(train_gen_72, 64)))

preds_model_1 = model.predict_generator(images_100)
WARNING:tensorflow:From <ipython-input-15-4423eb01144e>:3: Model.predict_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
Please use Model.predict, which supports generators.
In [ ]:
#plot images and predictions
plot_impages(train_gen_72, train_gen_144, train_gen_288, preds_model_1)
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).

Compiling the second model (with residual layers)

In [ ]:
def residual_block(cnl):
  inp = Input(shape=(None, None, cnl))
  x = Conv2D(cnl, 3, padding='same', activation=LeakyReLU(0.2))(inp)
  x = Conv2D(cnl, 3, padding='same', activation=LeakyReLU(0.2))(x)
  x = Add()([inp, x])
  output = Activation(LeakyReLU(0.2))(x)
  return Model(inp, output)


def build_residual_model():
  input1 = Input(shape=(None, None, 3))
  x = Conv2D(32, 3, activation=LeakyReLU(0.2), padding='same')(input1)
  x = residual_block(32)(x)
  x = residual_block(32)(x)
  x = UpSampling2D()(x)
  y = Conv2D(3, 1, activation='sigmoid', padding='same')(x)
  x = residual_block(32)(x)
  x = UpSampling2D()(x)
  y2 = Conv2D(3, 1, activation='sigmoid', padding='same')(x)
  return Model(inputs=input1, outputs=[y, y2])

# model with residual blocks
model = build_residual_model()
model.compile(optimizer='adam', loss='mse', metrics=[PSNR])
model.summary()
Model: "model_45"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_51 (InputLayer)           [(None, None, None,  0                                            
__________________________________________________________________________________________________
conv2d_171 (Conv2D)             (None, None, None, 3 896         input_51[0][0]                   
__________________________________________________________________________________________________
model_42 (Model)                (None, None, None, 3 18496       conv2d_171[0][0]                 
__________________________________________________________________________________________________
model_43 (Model)                (None, None, None, 3 18496       model_42[1][0]                   
__________________________________________________________________________________________________
up_sampling2d_43 (UpSampling2D) (None, None, None, 3 0           model_43[1][0]                   
__________________________________________________________________________________________________
model_44 (Model)                (None, None, None, 3 18496       up_sampling2d_43[0][0]           
__________________________________________________________________________________________________
up_sampling2d_44 (UpSampling2D) (None, None, None, 3 0           model_44[1][0]                   
__________________________________________________________________________________________________
conv2d_176 (Conv2D)             (None, None, None, 3 99          up_sampling2d_43[0][0]           
__________________________________________________________________________________________________
conv2d_179 (Conv2D)             (None, None, None, 3 99          up_sampling2d_44[0][0]           
==================================================================================================
Total params: 56,582
Trainable params: 56,582
Non-trainable params: 0
__________________________________________________________________________________________________

Fitting the second model, plot the loss and reset the generators

In [ ]:
reset_generators(train_gen_72)
reset_generators(train_gen_144)
reset_generators(train_gen_288)
reset_generators(train_gen_72_val)
reset_generators(train_gen_144_val)
reset_generators(train_gen_288_val)

history_2_residual = model.fit_generator(fit_generator(train_gen_72, train_gen_144, train_gen_288),
                              steps_per_epoch=train_gen_72.samples // batch_size,
                              validation_data = fit_generator(train_gen_72_val, train_gen_144_val, train_gen_288_val),
                              validation_steps = train_gen_72_val.samples // batch_size,
                              epochs = epochs)

reset_generators(train_gen_72)
reset_generators(train_gen_144)
reset_generators(train_gen_288)
reset_generators(train_gen_72_val)
reset_generators(train_gen_144_val)
reset_generators(train_gen_288_val)
Epoch 1/15
62/62 [==============================] - 75s 1s/step - loss: 0.0538 - conv2d_176_loss: 0.0284 - conv2d_179_loss: 0.0254 - conv2d_176_PSNR: 20.5791 - conv2d_179_PSNR: 21.2466 - val_loss: 0.0263 - val_conv2d_176_loss: 0.0135 - val_conv2d_179_loss: 0.0128 - val_conv2d_176_PSNR: 24.6935 - val_conv2d_179_PSNR: 25.4509
Epoch 2/15
62/62 [==============================] - 76s 1s/step - loss: 0.0245 - conv2d_176_loss: 0.0126 - conv2d_179_loss: 0.0119 - conv2d_176_PSNR: 25.7028 - conv2d_179_PSNR: 26.2487 - val_loss: 0.0234 - val_conv2d_176_loss: 0.0120 - val_conv2d_179_loss: 0.0114 - val_conv2d_176_PSNR: 26.0880 - val_conv2d_179_PSNR: 26.6059
Epoch 3/15
62/62 [==============================] - 75s 1s/step - loss: 0.0227 - conv2d_176_loss: 0.0117 - conv2d_179_loss: 0.0110 - conv2d_176_PSNR: 26.5548 - conv2d_179_PSNR: 26.8845 - val_loss: 0.0218 - val_conv2d_176_loss: 0.0113 - val_conv2d_179_loss: 0.0105 - val_conv2d_176_PSNR: 27.1622 - val_conv2d_179_PSNR: 27.7341
Epoch 4/15
62/62 [==============================] - 75s 1s/step - loss: 0.0220 - conv2d_176_loss: 0.0113 - conv2d_179_loss: 0.0107 - conv2d_176_PSNR: 27.0561 - conv2d_179_PSNR: 27.4048 - val_loss: 0.0213 - val_conv2d_176_loss: 0.0110 - val_conv2d_179_loss: 0.0103 - val_conv2d_176_PSNR: 27.4983 - val_conv2d_179_PSNR: 28.0019
Epoch 5/15
62/62 [==============================] - 75s 1s/step - loss: 0.0212 - conv2d_176_loss: 0.0109 - conv2d_179_loss: 0.0103 - conv2d_176_PSNR: 27.8602 - conv2d_179_PSNR: 28.1962 - val_loss: 0.0211 - val_conv2d_176_loss: 0.0108 - val_conv2d_179_loss: 0.0104 - val_conv2d_176_PSNR: 28.0302 - val_conv2d_179_PSNR: 27.6925
Epoch 6/15
62/62 [==============================] - 75s 1s/step - loss: 0.0208 - conv2d_176_loss: 0.0107 - conv2d_179_loss: 0.0101 - conv2d_176_PSNR: 28.3052 - conv2d_179_PSNR: 28.6573 - val_loss: 0.0206 - val_conv2d_176_loss: 0.0106 - val_conv2d_179_loss: 0.0100 - val_conv2d_176_PSNR: 28.4115 - val_conv2d_179_PSNR: 28.8138
Epoch 7/15
62/62 [==============================] - 75s 1s/step - loss: 0.0207 - conv2d_176_loss: 0.0106 - conv2d_179_loss: 0.0101 - conv2d_176_PSNR: 28.4183 - conv2d_179_PSNR: 28.6603 - val_loss: 0.0204 - val_conv2d_176_loss: 0.0105 - val_conv2d_179_loss: 0.0099 - val_conv2d_176_PSNR: 28.7353 - val_conv2d_179_PSNR: 29.0583
Epoch 8/15
62/62 [==============================] - 75s 1s/step - loss: 0.0209 - conv2d_176_loss: 0.0107 - conv2d_179_loss: 0.0102 - conv2d_176_PSNR: 28.2115 - conv2d_179_PSNR: 28.3336 - val_loss: 0.0204 - val_conv2d_176_loss: 0.0105 - val_conv2d_179_loss: 0.0099 - val_conv2d_176_PSNR: 28.6433 - val_conv2d_179_PSNR: 28.9758
Epoch 9/15
62/62 [==============================] - 75s 1s/step - loss: 0.0203 - conv2d_176_loss: 0.0104 - conv2d_179_loss: 0.0098 - conv2d_176_PSNR: 28.9193 - conv2d_179_PSNR: 29.4498 - val_loss: 0.0202 - val_conv2d_176_loss: 0.0104 - val_conv2d_179_loss: 0.0098 - val_conv2d_176_PSNR: 28.9167 - val_conv2d_179_PSNR: 29.5266
Epoch 10/15
62/62 [==============================] - 75s 1s/step - loss: 0.0205 - conv2d_176_loss: 0.0105 - conv2d_179_loss: 0.0100 - conv2d_176_PSNR: 28.7499 - conv2d_179_PSNR: 28.9855 - val_loss: 0.0205 - val_conv2d_176_loss: 0.0105 - val_conv2d_179_loss: 0.0100 - val_conv2d_176_PSNR: 28.5180 - val_conv2d_179_PSNR: 28.3750
Epoch 11/15
62/62 [==============================] - 75s 1s/step - loss: 0.0201 - conv2d_176_loss: 0.0104 - conv2d_179_loss: 0.0098 - conv2d_176_PSNR: 29.1302 - conv2d_179_PSNR: 29.6311 - val_loss: 0.0200 - val_conv2d_176_loss: 0.0103 - val_conv2d_179_loss: 0.0097 - val_conv2d_176_PSNR: 29.1525 - val_conv2d_179_PSNR: 29.7479
Epoch 12/15
62/62 [==============================] - 76s 1s/step - loss: 0.0203 - conv2d_176_loss: 0.0104 - conv2d_179_loss: 0.0099 - conv2d_176_PSNR: 28.9523 - conv2d_179_PSNR: 29.1820 - val_loss: 0.0215 - val_conv2d_176_loss: 0.0108 - val_conv2d_179_loss: 0.0107 - val_conv2d_176_PSNR: 27.6997 - val_conv2d_179_PSNR: 26.8700
Epoch 13/15
62/62 [==============================] - 75s 1s/step - loss: 0.0202 - conv2d_176_loss: 0.0104 - conv2d_179_loss: 0.0098 - conv2d_176_PSNR: 29.0878 - conv2d_179_PSNR: 29.3739 - val_loss: 0.0199 - val_conv2d_176_loss: 0.0103 - val_conv2d_179_loss: 0.0097 - val_conv2d_176_PSNR: 29.3569 - val_conv2d_179_PSNR: 29.9195
Epoch 14/15
62/62 [==============================] - 75s 1s/step - loss: 0.0200 - conv2d_176_loss: 0.0103 - conv2d_179_loss: 0.0097 - conv2d_176_PSNR: 29.3852 - conv2d_179_PSNR: 29.8518 - val_loss: 0.0199 - val_conv2d_176_loss: 0.0102 - val_conv2d_179_loss: 0.0097 - val_conv2d_176_PSNR: 29.4480 - val_conv2d_179_PSNR: 29.8486
Epoch 15/15
62/62 [==============================] - 76s 1s/step - loss: 0.0200 - conv2d_176_loss: 0.0103 - conv2d_179_loss: 0.0097 - conv2d_176_PSNR: 29.2854 - conv2d_179_PSNR: 29.5980 - val_loss: 0.0201 - val_conv2d_176_loss: 0.0103 - val_conv2d_179_loss: 0.0098 - val_conv2d_176_PSNR: 28.8233 - val_conv2d_179_PSNR: 28.7735
In [ ]:
#plot loss metrics
plot_loss(history_2_residual)
In [ ]:
#plot PSNR metrics
plot_PSNR(history_2_residual)
In [ ]:
images_100 = np.asarray(next(n_samples(train_gen_72, 64)))

preds_model_2 = model.predict_generator(images_100)
In [ ]:
plot_impages(train_gen_72, train_gen_144, train_gen_288, preds_model_2)
In [ ]:
def build_deep_residual_model():
  input1 = Input(shape=(None, None, 3))
  x = Conv2D(32, 3, activation=LeakyReLU(0.2), padding='same')(input1)
  x = residual_block(32)(x)
  x = residual_block(32)(x)
  x = residual_block(32)(x)
  x = residual_block(32)(x)
  x = residual_block(32)(x)
  x = residual_block(32)(x)
  x = UpSampling2D()(x)
  y = Conv2D(3, 1, activation='sigmoid', padding='same')(x)
  x = residual_block(32)(x)
  x = UpSampling2D()(x)
  y2 = Conv2D(3, 1, activation='sigmoid', padding='same')(x)
  return Model(inputs=input1, outputs=[y, y2])

model = build_deep_residual_model()
model.compile(optimizer='adam', loss='mse', metrics=[PSNR])
model.summary()
Model: "model_53"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_55 (InputLayer)           [(None, None, None,  0                                            
__________________________________________________________________________________________________
conv2d_180 (Conv2D)             (None, None, None, 3 896         input_55[0][0]                   
__________________________________________________________________________________________________
model_46 (Model)                (None, None, None, 3 18496       conv2d_180[0][0]                 
__________________________________________________________________________________________________
model_47 (Model)                (None, None, None, 3 18496       model_46[1][0]                   
__________________________________________________________________________________________________
model_48 (Model)                (None, None, None, 3 18496       model_47[1][0]                   
__________________________________________________________________________________________________
model_49 (Model)                (None, None, None, 3 18496       model_48[1][0]                   
__________________________________________________________________________________________________
model_50 (Model)                (None, None, None, 3 18496       model_49[1][0]                   
__________________________________________________________________________________________________
model_51 (Model)                (None, None, None, 3 18496       model_50[1][0]                   
__________________________________________________________________________________________________
up_sampling2d_45 (UpSampling2D) (None, None, None, 3 0           model_51[1][0]                   
__________________________________________________________________________________________________
model_52 (Model)                (None, None, None, 3 18496       up_sampling2d_45[0][0]           
__________________________________________________________________________________________________
up_sampling2d_46 (UpSampling2D) (None, None, None, 3 0           model_52[1][0]                   
__________________________________________________________________________________________________
conv2d_193 (Conv2D)             (None, None, None, 3 99          up_sampling2d_45[0][0]           
__________________________________________________________________________________________________
conv2d_196 (Conv2D)             (None, None, None, 3 99          up_sampling2d_46[0][0]           
==================================================================================================
Total params: 130,566
Trainable params: 130,566
Non-trainable params: 0
__________________________________________________________________________________________________
In [ ]:
reset_generators(train_gen_72)
reset_generators(train_gen_144)
reset_generators(train_gen_288)
reset_generators(train_gen_72_val)
reset_generators(train_gen_144_val)
reset_generators(train_gen_288_val)

history_3_residual_deeper = model.fit_generator(fit_generator(train_gen_72, train_gen_144, train_gen_288),
                              steps_per_epoch=train_gen_72.samples // batch_size,
                              validation_data = fit_generator(train_gen_72_val, train_gen_144_val, train_gen_288_val),
                              validation_steps = train_gen_72_val.samples // batch_size,
                              epochs = epochs)

reset_generators(train_gen_72)
reset_generators(train_gen_144)
reset_generators(train_gen_288)
reset_generators(train_gen_72_val)
reset_generators(train_gen_144_val)
reset_generators(train_gen_288_val)
Epoch 1/15
62/62 [==============================] - 79s 1s/step - loss: 0.0615 - conv2d_193_loss: 0.0330 - conv2d_196_loss: 0.0285 - conv2d_193_PSNR: 20.0249 - conv2d_196_PSNR: 20.8726 - val_loss: 0.0262 - val_conv2d_193_loss: 0.0136 - val_conv2d_196_loss: 0.0126 - val_conv2d_193_PSNR: 24.7811 - val_conv2d_196_PSNR: 25.1942
Epoch 2/15
62/62 [==============================] - 79s 1s/step - loss: 0.0243 - conv2d_193_loss: 0.0125 - conv2d_196_loss: 0.0118 - conv2d_193_PSNR: 25.7717 - conv2d_196_PSNR: 25.8927 - val_loss: 0.0228 - val_conv2d_193_loss: 0.0117 - val_conv2d_196_loss: 0.0110 - val_conv2d_193_PSNR: 26.4592 - val_conv2d_196_PSNR: 26.6895
Epoch 3/15
62/62 [==============================] - 78s 1s/step - loss: 0.0228 - conv2d_193_loss: 0.0117 - conv2d_196_loss: 0.0112 - conv2d_193_PSNR: 26.6323 - conv2d_196_PSNR: 26.6769 - val_loss: 0.0220 - val_conv2d_193_loss: 0.0113 - val_conv2d_196_loss: 0.0107 - val_conv2d_193_PSNR: 26.9156 - val_conv2d_196_PSNR: 27.2118
Epoch 4/15
62/62 [==============================] - 79s 1s/step - loss: 0.0213 - conv2d_193_loss: 0.0109 - conv2d_196_loss: 0.0104 - conv2d_193_PSNR: 27.8746 - conv2d_196_PSNR: 28.1159 - val_loss: 0.0210 - val_conv2d_193_loss: 0.0108 - val_conv2d_196_loss: 0.0102 - val_conv2d_193_PSNR: 28.0377 - val_conv2d_196_PSNR: 28.2100
Epoch 5/15
62/62 [==============================] - 79s 1s/step - loss: 0.0211 - conv2d_193_loss: 0.0108 - conv2d_196_loss: 0.0103 - conv2d_193_PSNR: 28.0507 - conv2d_196_PSNR: 28.1980 - val_loss: 0.0206 - val_conv2d_193_loss: 0.0106 - val_conv2d_196_loss: 0.0100 - val_conv2d_193_PSNR: 28.6094 - val_conv2d_196_PSNR: 28.9667
Epoch 6/15
62/62 [==============================] - 79s 1s/step - loss: 0.0206 - conv2d_193_loss: 0.0106 - conv2d_196_loss: 0.0100 - conv2d_193_PSNR: 28.5158 - conv2d_196_PSNR: 28.7252 - val_loss: 0.0204 - val_conv2d_193_loss: 0.0105 - val_conv2d_196_loss: 0.0099 - val_conv2d_193_PSNR: 28.6866 - val_conv2d_196_PSNR: 28.9823
Epoch 7/15
62/62 [==============================] - 79s 1s/step - loss: 0.0206 - conv2d_193_loss: 0.0106 - conv2d_196_loss: 0.0101 - conv2d_193_PSNR: 28.5070 - conv2d_196_PSNR: 28.6196 - val_loss: 0.0204 - val_conv2d_193_loss: 0.0105 - val_conv2d_196_loss: 0.0099 - val_conv2d_193_PSNR: 28.5755 - val_conv2d_196_PSNR: 28.9963
Epoch 8/15
62/62 [==============================] - 79s 1s/step - loss: 0.0205 - conv2d_193_loss: 0.0105 - conv2d_196_loss: 0.0100 - conv2d_193_PSNR: 28.6006 - conv2d_196_PSNR: 28.6824 - val_loss: 0.0202 - val_conv2d_193_loss: 0.0104 - val_conv2d_196_loss: 0.0098 - val_conv2d_193_PSNR: 28.8814 - val_conv2d_196_PSNR: 28.9283
Epoch 9/15
62/62 [==============================] - 78s 1s/step - loss: 0.0202 - conv2d_193_loss: 0.0104 - conv2d_196_loss: 0.0098 - conv2d_193_PSNR: 29.1203 - conv2d_196_PSNR: 29.3209 - val_loss: 0.0201 - val_conv2d_193_loss: 0.0103 - val_conv2d_196_loss: 0.0098 - val_conv2d_193_PSNR: 28.9525 - val_conv2d_196_PSNR: 29.0237
Epoch 10/15
62/62 [==============================] - 79s 1s/step - loss: 0.0202 - conv2d_193_loss: 0.0103 - conv2d_196_loss: 0.0098 - conv2d_193_PSNR: 29.0497 - conv2d_196_PSNR: 29.1745 - val_loss: 0.0201 - val_conv2d_193_loss: 0.0103 - val_conv2d_196_loss: 0.0098 - val_conv2d_193_PSNR: 28.9357 - val_conv2d_196_PSNR: 28.8160
Epoch 11/15
62/62 [==============================] - 79s 1s/step - loss: 0.0202 - conv2d_193_loss: 0.0103 - conv2d_196_loss: 0.0098 - conv2d_193_PSNR: 29.0353 - conv2d_196_PSNR: 29.1876 - val_loss: 0.0199 - val_conv2d_193_loss: 0.0102 - val_conv2d_196_loss: 0.0097 - val_conv2d_193_PSNR: 29.4092 - val_conv2d_196_PSNR: 29.5234
Epoch 12/15
62/62 [==============================] - 79s 1s/step - loss: 0.0200 - conv2d_193_loss: 0.0103 - conv2d_196_loss: 0.0097 - conv2d_193_PSNR: 29.2465 - conv2d_196_PSNR: 29.4024 - val_loss: 0.0202 - val_conv2d_193_loss: 0.0103 - val_conv2d_196_loss: 0.0099 - val_conv2d_193_PSNR: 28.6880 - val_conv2d_196_PSNR: 28.6626
Epoch 13/15
62/62 [==============================] - 78s 1s/step - loss: 0.0199 - conv2d_193_loss: 0.0102 - conv2d_196_loss: 0.0097 - conv2d_193_PSNR: 29.4127 - conv2d_196_PSNR: 29.5777 - val_loss: 0.0198 - val_conv2d_193_loss: 0.0102 - val_conv2d_196_loss: 0.0096 - val_conv2d_193_PSNR: 29.5538 - val_conv2d_196_PSNR: 29.7990
Epoch 14/15
62/62 [==============================] - 79s 1s/step - loss: 0.0199 - conv2d_193_loss: 0.0102 - conv2d_196_loss: 0.0097 - conv2d_193_PSNR: 29.4488 - conv2d_196_PSNR: 29.6094 - val_loss: 0.0197 - val_conv2d_193_loss: 0.0101 - val_conv2d_196_loss: 0.0096 - val_conv2d_193_PSNR: 29.7161 - val_conv2d_196_PSNR: 29.8326
Epoch 15/15
62/62 [==============================] - 78s 1s/step - loss: 0.0198 - conv2d_193_loss: 0.0102 - conv2d_196_loss: 0.0096 - conv2d_193_PSNR: 29.5133 - conv2d_196_PSNR: 29.6509 - val_loss: 0.0202 - val_conv2d_193_loss: 0.0104 - val_conv2d_196_loss: 0.0099 - val_conv2d_193_PSNR: 28.4164 - val_conv2d_196_PSNR: 28.5239
In [ ]:
images_100 = np.asarray(next(n_samples(train_gen_72, 64)))

# type(train_gen_72)
preds_model_3 = model.predict_generator(images_100)
In [ ]:
#plot loss metrics
plot_loss(history_3_residual)
In [ ]:
#plot loss metrics
plot_PSNR(history_3_residual)
In [ ]:
plot_impages(train_gen_72, train_gen_144, train_gen_288, preds_model_3)
In [ ]:
def dilation_block(cnl):
  inp = Input(shape=(None, None, cnl))
  y1 = Conv2D(cnl, 3, padding='same', dilation_rate=(1,1), activation=LeakyReLU(0.2))(inp)
  y2 = Conv2D(cnl, 3, padding='same', dilation_rate=(2,2), activation=LeakyReLU(0.2))(inp)
  y4 = Conv2D(cnl, 3, padding='same', dilation_rate=(4,4), activation=LeakyReLU(0.2))(inp)
  x = Concatenate()([y1, y2, y4])
  x = Activation(LeakyReLU(0.2))(x)
  output = Conv2D(cnl, 3, padding='same', activation=LeakyReLU(0.2))(x)
  return Model(inp, output)

def build_dilation_model():
  input1 = Input(shape=(None, None, 3))
  x = Conv2D(32, 3, activation=LeakyReLU(0.2), padding='same')(input1)
  x = dilation_block(32)(x)
  x = dilation_block(32)(x)
  x = UpSampling2D()(x)
  y = Conv2D(3, 1, activation='sigmoid', padding='same')(x)
  x = dilation_block(32)(x)
  x = UpSampling2D()(x)
  y2 = Conv2D(3, 1, activation='sigmoid', padding='same')(x)
  return Model(inputs=input1, outputs=[y, y2])

model = build_dilation_model()
model.compile(optimizer='adam', loss='mse', metrics=[PSNR])
model.summary()
Model: "model_57"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_63 (InputLayer)           [(None, None, None,  0                                            
__________________________________________________________________________________________________
conv2d_197 (Conv2D)             (None, None, None, 3 896         input_63[0][0]                   
__________________________________________________________________________________________________
model_54 (Model)                (None, None, None, 3 55424       conv2d_197[0][0]                 
__________________________________________________________________________________________________
model_55 (Model)                (None, None, None, 3 55424       model_54[1][0]                   
__________________________________________________________________________________________________
up_sampling2d_47 (UpSampling2D) (None, None, None, 3 0           model_55[1][0]                   
__________________________________________________________________________________________________
model_56 (Model)                (None, None, None, 3 55424       up_sampling2d_47[0][0]           
__________________________________________________________________________________________________
up_sampling2d_48 (UpSampling2D) (None, None, None, 3 0           model_56[1][0]                   
__________________________________________________________________________________________________
conv2d_206 (Conv2D)             (None, None, None, 3 99          up_sampling2d_47[0][0]           
__________________________________________________________________________________________________
conv2d_211 (Conv2D)             (None, None, None, 3 99          up_sampling2d_48[0][0]           
==================================================================================================
Total params: 167,366
Trainable params: 167,366
Non-trainable params: 0
__________________________________________________________________________________________________
In [ ]:
reset_generators(train_gen_72)
reset_generators(train_gen_144)
reset_generators(train_gen_288)
reset_generators(train_gen_72_val)
reset_generators(train_gen_144_val)
reset_generators(train_gen_288_val)

history_5_residual = model.fit_generator(fit_generator(train_gen_72, train_gen_144, train_gen_288),
                              steps_per_epoch=train_gen_72.samples // batch_size,
                              validation_data = fit_generator(train_gen_72_val, train_gen_144_val, train_gen_288_val),
                              validation_steps = train_gen_72_val.samples // batch_size,
                              epochs = epochs)

reset_generators(train_gen_72)
reset_generators(train_gen_144)
reset_generators(train_gen_288)
reset_generators(train_gen_72_val)
reset_generators(train_gen_144_val)
reset_generators(train_gen_288_val)
Epoch 1/15
62/62 [==============================] - 88s 1s/step - loss: 0.0618 - conv2d_206_loss: 0.0319 - conv2d_211_loss: 0.0299 - conv2d_206_PSNR: 20.0833 - conv2d_211_PSNR: 20.5679 - val_loss: 0.0272 - val_conv2d_206_loss: 0.0139 - val_conv2d_211_loss: 0.0133 - val_conv2d_206_PSNR: 24.7092 - val_conv2d_211_PSNR: 25.1529
Epoch 2/15
62/62 [==============================] - 88s 1s/step - loss: 0.0254 - conv2d_206_loss: 0.0129 - conv2d_211_loss: 0.0124 - conv2d_206_PSNR: 25.4562 - conv2d_211_PSNR: 25.7373 - val_loss: 0.0235 - val_conv2d_206_loss: 0.0120 - val_conv2d_211_loss: 0.0115 - val_conv2d_206_PSNR: 26.3834 - val_conv2d_211_PSNR: 26.5794
Epoch 3/15
62/62 [==============================] - 88s 1s/step - loss: 0.0227 - conv2d_206_loss: 0.0116 - conv2d_211_loss: 0.0111 - conv2d_206_PSNR: 26.8054 - conv2d_211_PSNR: 27.1361 - val_loss: 0.0218 - val_conv2d_206_loss: 0.0112 - val_conv2d_211_loss: 0.0106 - val_conv2d_206_PSNR: 27.4537 - val_conv2d_211_PSNR: 27.9238
Epoch 4/15
62/62 [==============================] - 87s 1s/step - loss: 0.0217 - conv2d_206_loss: 0.0111 - conv2d_211_loss: 0.0106 - conv2d_206_PSNR: 27.4828 - conv2d_211_PSNR: 27.7922 - val_loss: 0.0214 - val_conv2d_206_loss: 0.0109 - val_conv2d_211_loss: 0.0104 - val_conv2d_206_PSNR: 27.6829 - val_conv2d_211_PSNR: 28.0430
Epoch 5/15
62/62 [==============================] - 87s 1s/step - loss: 0.0216 - conv2d_206_loss: 0.0110 - conv2d_211_loss: 0.0106 - conv2d_206_PSNR: 27.5891 - conv2d_211_PSNR: 27.8251 - val_loss: 0.0209 - val_conv2d_206_loss: 0.0107 - val_conv2d_211_loss: 0.0102 - val_conv2d_206_PSNR: 28.0908 - val_conv2d_211_PSNR: 28.6591
Epoch 6/15
62/62 [==============================] - 88s 1s/step - loss: 0.0209 - conv2d_206_loss: 0.0107 - conv2d_211_loss: 0.0102 - conv2d_206_PSNR: 28.0715 - conv2d_211_PSNR: 28.4888 - val_loss: 0.0207 - val_conv2d_206_loss: 0.0106 - val_conv2d_211_loss: 0.0101 - val_conv2d_206_PSNR: 28.2828 - val_conv2d_211_PSNR: 28.7851
Epoch 7/15
62/62 [==============================] - 88s 1s/step - loss: 0.0206 - conv2d_206_loss: 0.0106 - conv2d_211_loss: 0.0100 - conv2d_206_PSNR: 28.4538 - conv2d_211_PSNR: 28.9708 - val_loss: 0.0208 - val_conv2d_206_loss: 0.0106 - val_conv2d_211_loss: 0.0102 - val_conv2d_206_PSNR: 28.1375 - val_conv2d_211_PSNR: 28.1399
Epoch 8/15
62/62 [==============================] - 87s 1s/step - loss: 0.0206 - conv2d_206_loss: 0.0106 - conv2d_211_loss: 0.0100 - conv2d_206_PSNR: 28.3622 - conv2d_211_PSNR: 28.6796 - val_loss: 0.0203 - val_conv2d_206_loss: 0.0104 - val_conv2d_211_loss: 0.0099 - val_conv2d_206_PSNR: 28.7285 - val_conv2d_211_PSNR: 29.3340
Epoch 9/15
62/62 [==============================] - 87s 1s/step - loss: 0.0205 - conv2d_206_loss: 0.0105 - conv2d_211_loss: 0.0100 - conv2d_206_PSNR: 28.5007 - conv2d_211_PSNR: 28.7598 - val_loss: 0.0202 - val_conv2d_206_loss: 0.0104 - val_conv2d_211_loss: 0.0098 - val_conv2d_206_PSNR: 28.7182 - val_conv2d_211_PSNR: 29.1760
Epoch 10/15
62/62 [==============================] - 87s 1s/step - loss: 0.0204 - conv2d_206_loss: 0.0105 - conv2d_211_loss: 0.0099 - conv2d_206_PSNR: 28.6815 - conv2d_211_PSNR: 29.0251 - val_loss: 0.0201 - val_conv2d_206_loss: 0.0104 - val_conv2d_211_loss: 0.0098 - val_conv2d_206_PSNR: 28.8547 - val_conv2d_211_PSNR: 29.3935
Epoch 11/15
62/62 [==============================] - 87s 1s/step - loss: 0.0201 - conv2d_206_loss: 0.0104 - conv2d_211_loss: 0.0098 - conv2d_206_PSNR: 28.9428 - conv2d_211_PSNR: 29.3907 - val_loss: 0.0201 - val_conv2d_206_loss: 0.0103 - val_conv2d_211_loss: 0.0097 - val_conv2d_206_PSNR: 28.8839 - val_conv2d_211_PSNR: 29.3221
Epoch 12/15
62/62 [==============================] - 87s 1s/step - loss: 0.0201 - conv2d_206_loss: 0.0103 - conv2d_211_loss: 0.0098 - conv2d_206_PSNR: 28.9075 - conv2d_211_PSNR: 29.2313 - val_loss: 0.0199 - val_conv2d_206_loss: 0.0103 - val_conv2d_211_loss: 0.0097 - val_conv2d_206_PSNR: 29.2023 - val_conv2d_211_PSNR: 29.7964
Epoch 13/15
62/62 [==============================] - 87s 1s/step - loss: 0.0201 - conv2d_206_loss: 0.0103 - conv2d_211_loss: 0.0098 - conv2d_206_PSNR: 28.9858 - conv2d_211_PSNR: 29.3422 - val_loss: 0.0199 - val_conv2d_206_loss: 0.0102 - val_conv2d_211_loss: 0.0096 - val_conv2d_206_PSNR: 29.1553 - val_conv2d_211_PSNR: 29.6538
Epoch 14/15
62/62 [==============================] - 87s 1s/step - loss: 0.0200 - conv2d_206_loss: 0.0103 - conv2d_211_loss: 0.0097 - conv2d_206_PSNR: 29.0130 - conv2d_211_PSNR: 29.3350 - val_loss: 0.0198 - val_conv2d_206_loss: 0.0102 - val_conv2d_211_loss: 0.0096 - val_conv2d_206_PSNR: 29.3230 - val_conv2d_211_PSNR: 29.7847
Epoch 15/15
62/62 [==============================] - 87s 1s/step - loss: 0.0199 - conv2d_206_loss: 0.0103 - conv2d_211_loss: 0.0097 - conv2d_206_PSNR: 29.1055 - conv2d_211_PSNR: 29.3796 - val_loss: 0.0199 - val_conv2d_206_loss: 0.0102 - val_conv2d_211_loss: 0.0096 - val_conv2d_206_PSNR: 29.1727 - val_conv2d_211_PSNR: 29.4573
In [ ]:
#plot loss metrics
plot_loss(history_5_residual)
In [ ]:
#plot PNSR metrics
plot_PSNR(history_5_residual)
In [ ]:
images_100 = np.asarray(next(n_samples(train_gen_72, 64)))

# type(train_gen_72)
preds_model_5 = model.predict_generator(images_100)
In [ ]:
plot_impages(train_gen_72, train_gen_144, train_gen_288, preds_model_5)
In [19]:
def build_dilation_vgg16_model():
  input1 = Input(shape=(None, None, 3))
  model_vgg_16 = VGG16(include_top=False, weights='imagenet', input_tensor=input1)
  model_vgg_16_layer = model_vgg_16.get_layer('block1_conv2').output
  x = Conv2D(64, 3, activation=LeakyReLU(0.2), padding='same')(input1)
  x = Conv2D(64, 3, activation=LeakyReLU(0.2), padding='same')(x)
  # vgg_model = model_vgg_16(input1)
  # x = dilation_block(32)(x)
  # x = dilation_block(32)(x)
  x = Concatenate()([x, model_vgg_16_layer])
  x = UpSampling2D()(x)
  y = Conv2D(3, 1, activation='sigmoid', padding='same')(x)
  # x = dilation_block(32)(x)
  x = UpSampling2D()(x)
  y2 = Conv2D(3, 1, activation='sigmoid', padding='same')(x)
  x = Conv2D(3, 1, activation=LeakyReLU(0.2), padding='same')(x)
  return Model(inputs=input1, outputs=[y, y2])

model = build_dilation_vgg16_model()
model.compile(optimizer='adam', loss='mse', metrics=[PSNR])
model.summary()
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5
58892288/58889256 [==============================] - 0s 0us/step
Model: "model_1"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_2 (InputLayer)            [(None, None, None,  0                                            
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, None, None, 6 1792        input_2[0][0]                    
__________________________________________________________________________________________________
block1_conv1 (Conv2D)           (None, None, None, 6 1792        input_2[0][0]                    
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, None, None, 6 36928       conv2d_4[0][0]                   
__________________________________________________________________________________________________
block1_conv2 (Conv2D)           (None, None, None, 6 36928       block1_conv1[0][0]               
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, None, None, 1 0           conv2d_5[0][0]                   
                                                                 block1_conv2[0][0]               
__________________________________________________________________________________________________
up_sampling2d_2 (UpSampling2D)  (None, None, None, 1 0           concatenate[0][0]                
__________________________________________________________________________________________________
up_sampling2d_3 (UpSampling2D)  (None, None, None, 1 0           up_sampling2d_2[0][0]            
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, None, None, 3 387         up_sampling2d_2[0][0]            
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, None, None, 3 387         up_sampling2d_3[0][0]            
==================================================================================================
Total params: 78,214
Trainable params: 78,214
Non-trainable params: 0
__________________________________________________________________________________________________
In [20]:
reset_generators(train_gen_72)
reset_generators(train_gen_144)
reset_generators(train_gen_288)
reset_generators(train_gen_72_val)
reset_generators(train_gen_144_val)
reset_generators(train_gen_288_val)

history_6_residual = model.fit_generator(fit_generator(train_gen_72, train_gen_144, train_gen_288),
                              steps_per_epoch=train_gen_72.samples // batch_size,
                              validation_data = fit_generator(train_gen_72_val, train_gen_144_val, train_gen_288_val),
                              validation_steps = train_gen_72_val.samples // batch_size,
                              epochs = epochs)

reset_generators(train_gen_72)
reset_generators(train_gen_144)
reset_generators(train_gen_288)
reset_generators(train_gen_72_val)
reset_generators(train_gen_144_val)
reset_generators(train_gen_288_val)
Epoch 1/15
62/62 [==============================] - 62s 1s/step - loss: 0.0846 - conv2d_6_loss: 0.0424 - conv2d_7_loss: 0.0421 - conv2d_6_PSNR: 18.9698 - conv2d_7_PSNR: 19.3919 - val_loss: 0.0312 - val_conv2d_6_loss: 0.0158 - val_conv2d_7_loss: 0.0154 - val_conv2d_6_PSNR: 23.6014 - val_conv2d_7_PSNR: 23.7572
Epoch 2/15
62/62 [==============================] - 63s 1s/step - loss: 0.0272 - conv2d_6_loss: 0.0136 - conv2d_7_loss: 0.0136 - conv2d_6_PSNR: 24.8766 - conv2d_7_PSNR: 24.7401 - val_loss: 0.0248 - val_conv2d_6_loss: 0.0123 - val_conv2d_7_loss: 0.0125 - val_conv2d_6_PSNR: 25.9340 - val_conv2d_7_PSNR: 25.6004
Epoch 3/15
62/62 [==============================] - 63s 1s/step - loss: 0.0239 - conv2d_6_loss: 0.0119 - conv2d_7_loss: 0.0120 - conv2d_6_PSNR: 26.3690 - conv2d_7_PSNR: 26.1601 - val_loss: 0.0232 - val_conv2d_6_loss: 0.0116 - val_conv2d_7_loss: 0.0116 - val_conv2d_6_PSNR: 26.7787 - val_conv2d_7_PSNR: 26.6167
Epoch 4/15
62/62 [==============================] - 64s 1s/step - loss: 0.0227 - conv2d_6_loss: 0.0113 - conv2d_7_loss: 0.0114 - conv2d_6_PSNR: 27.0425 - conv2d_7_PSNR: 26.9201 - val_loss: 0.0223 - val_conv2d_6_loss: 0.0111 - val_conv2d_7_loss: 0.0112 - val_conv2d_6_PSNR: 27.3099 - val_conv2d_7_PSNR: 27.1211
Epoch 5/15
62/62 [==============================] - 64s 1s/step - loss: 0.0222 - conv2d_6_loss: 0.0111 - conv2d_7_loss: 0.0111 - conv2d_6_PSNR: 27.4400 - conv2d_7_PSNR: 27.2772 - val_loss: 0.0220 - val_conv2d_6_loss: 0.0109 - val_conv2d_7_loss: 0.0110 - val_conv2d_6_PSNR: 27.5416 - val_conv2d_7_PSNR: 27.3449
Epoch 6/15
62/62 [==============================] - 64s 1s/step - loss: 0.0218 - conv2d_6_loss: 0.0109 - conv2d_7_loss: 0.0109 - conv2d_6_PSNR: 27.7237 - conv2d_7_PSNR: 27.5348 - val_loss: 0.0218 - val_conv2d_6_loss: 0.0109 - val_conv2d_7_loss: 0.0110 - val_conv2d_6_PSNR: 27.5110 - val_conv2d_7_PSNR: 27.2949
Epoch 7/15
62/62 [==============================] - 64s 1s/step - loss: 0.0216 - conv2d_6_loss: 0.0108 - conv2d_7_loss: 0.0109 - conv2d_6_PSNR: 27.8984 - conv2d_7_PSNR: 27.6964 - val_loss: 0.0214 - val_conv2d_6_loss: 0.0106 - val_conv2d_7_loss: 0.0107 - val_conv2d_6_PSNR: 28.0929 - val_conv2d_7_PSNR: 27.9297
Epoch 8/15
62/62 [==============================] - 65s 1s/step - loss: 0.0214 - conv2d_6_loss: 0.0107 - conv2d_7_loss: 0.0108 - conv2d_6_PSNR: 28.0365 - conv2d_7_PSNR: 27.8390 - val_loss: 0.0212 - val_conv2d_6_loss: 0.0106 - val_conv2d_7_loss: 0.0107 - val_conv2d_6_PSNR: 28.1953 - val_conv2d_7_PSNR: 28.1246
Epoch 9/15
62/62 [==============================] - 64s 1s/step - loss: 0.0212 - conv2d_6_loss: 0.0106 - conv2d_7_loss: 0.0106 - conv2d_6_PSNR: 28.2306 - conv2d_7_PSNR: 28.0831 - val_loss: 0.0210 - val_conv2d_6_loss: 0.0105 - val_conv2d_7_loss: 0.0106 - val_conv2d_6_PSNR: 28.3589 - val_conv2d_7_PSNR: 28.3591
Epoch 10/15
62/62 [==============================] - 64s 1s/step - loss: 0.0211 - conv2d_6_loss: 0.0105 - conv2d_7_loss: 0.0106 - conv2d_6_PSNR: 28.3884 - conv2d_7_PSNR: 28.3058 - val_loss: 0.0209 - val_conv2d_6_loss: 0.0104 - val_conv2d_7_loss: 0.0105 - val_conv2d_6_PSNR: 28.5111 - val_conv2d_7_PSNR: 28.5113
Epoch 11/15
62/62 [==============================] - 64s 1s/step - loss: 0.0209 - conv2d_6_loss: 0.0104 - conv2d_7_loss: 0.0105 - conv2d_6_PSNR: 28.5939 - conv2d_7_PSNR: 28.5777 - val_loss: 0.0208 - val_conv2d_6_loss: 0.0104 - val_conv2d_7_loss: 0.0104 - val_conv2d_6_PSNR: 28.7240 - val_conv2d_7_PSNR: 28.6887
Epoch 12/15
62/62 [==============================] - 64s 1s/step - loss: 0.0208 - conv2d_6_loss: 0.0104 - conv2d_7_loss: 0.0104 - conv2d_6_PSNR: 28.7405 - conv2d_7_PSNR: 28.7578 - val_loss: 0.0207 - val_conv2d_6_loss: 0.0103 - val_conv2d_7_loss: 0.0104 - val_conv2d_6_PSNR: 28.7113 - val_conv2d_7_PSNR: 28.8480
Epoch 13/15
62/62 [==============================] - 65s 1s/step - loss: 0.0207 - conv2d_6_loss: 0.0104 - conv2d_7_loss: 0.0104 - conv2d_6_PSNR: 28.8306 - conv2d_7_PSNR: 28.8265 - val_loss: 0.0207 - val_conv2d_6_loss: 0.0103 - val_conv2d_7_loss: 0.0104 - val_conv2d_6_PSNR: 28.9158 - val_conv2d_7_PSNR: 28.6694
Epoch 14/15
62/62 [==============================] - 65s 1s/step - loss: 0.0207 - conv2d_6_loss: 0.0103 - conv2d_7_loss: 0.0103 - conv2d_6_PSNR: 28.9046 - conv2d_7_PSNR: 28.9179 - val_loss: 0.0206 - val_conv2d_6_loss: 0.0103 - val_conv2d_7_loss: 0.0104 - val_conv2d_6_PSNR: 28.8428 - val_conv2d_7_PSNR: 28.6873
Epoch 15/15
62/62 [==============================] - 64s 1s/step - loss: 0.0206 - conv2d_6_loss: 0.0103 - conv2d_7_loss: 0.0103 - conv2d_6_PSNR: 29.0041 - conv2d_7_PSNR: 28.9943 - val_loss: 0.0205 - val_conv2d_6_loss: 0.0102 - val_conv2d_7_loss: 0.0103 - val_conv2d_6_PSNR: 28.8136 - val_conv2d_7_PSNR: 29.1206
In [21]:
#plot loss metrics
plot_loss(history_6_residual)
In [22]:
#plot PSNR metrics
plot_PSNR(history_6_residual)
In [23]:
images_100 = np.asarray(next(n_samples(train_gen_72, 64)))

# type(train_gen_72)
preds_model_6 = model.predict_generator(images_100)
In [24]:
plot_impages(train_gen_72, train_gen_144, train_gen_288, preds_model_6)
In [ ]: